context_save(v);
- if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && ctx->msr_bitmap_set )
+ if ( !vpmu_is_set(vpmu, VPMU_RUNNING) &&
+ has_hvm_container_vcpu(v) && ctx->msr_bitmap_set )
amd_vpmu_unset_msr_bitmap(v);
return 1;
ASSERT(!supported);
/* For all counters, enable guest only mode for HVM guest */
- if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
- !(is_guest_mode(msr_content)) )
+ if ( has_hvm_container_vcpu(v) &&
+ (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
+ !is_guest_mode(msr_content) )
{
set_guest_mode(msr_content);
}
apic_write(APIC_LVTPC, PMU_APIC_VECTOR);
vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR;
- if ( !((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
- amd_vpmu_set_msr_bitmap(v);
+ if ( has_hvm_container_vcpu(v) &&
+ !((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
+ amd_vpmu_set_msr_bitmap(v);
}
/* stop saving & restore if guest stops first counter */
apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED;
vpmu_reset(vpmu, VPMU_RUNNING);
- if ( ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
- amd_vpmu_unset_msr_bitmap(v);
+ if ( has_hvm_container_vcpu(v) &&
+ ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
+ amd_vpmu_unset_msr_bitmap(v);
release_pmu_ownship(PMU_OWNER_HVM);
}
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- if ( ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
+ if ( has_hvm_container_vcpu(v) &&
+ ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
amd_vpmu_unset_msr_bitmap(v);
xfree(vpmu->context);
__core2_vpmu_save(v);
/* Unset PMU MSR bitmap to trap lazy load. */
- if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && cpu_has_vmx_msr_bitmap )
+ if ( !vpmu_is_set(vpmu, VPMU_RUNNING) &&
+ has_hvm_container_vcpu(v) && cpu_has_vmx_msr_bitmap )
core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
return 1;
{
__core2_vpmu_load(current);
vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
- if ( cpu_has_vmx_msr_bitmap )
+ if ( has_hvm_container_vcpu(current) &&
+ cpu_has_vmx_msr_bitmap )
core2_vpmu_set_msr_bitmap(current->arch.hvm_vmx.msr_bitmap);
}
return 1;
xfree(core2_vpmu_cxt->pmu_enable);
xfree(vpmu->context);
- if ( cpu_has_vmx_msr_bitmap )
+ if ( has_hvm_container_vcpu(v) && cpu_has_vmx_msr_bitmap )
core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
release_pmu_ownship(PMU_OWNER_HVM);
vpmu_reset(vpmu, VPMU_CONTEXT_ALLOCATED);